Added #ifdef CONFIG_XEN_SHADOW_MODE to protect all of Christian's new code.
Signed-off-by: michael.fetterman@cl.cam.ac.uk
space. Odds are that you want to say N here.
config XEN_WRITABLE_PAGETABLES
- bool
+ bool "writable page tables"
default y
+config XEN_SHADOW_MODE
+ bool "Fake shadow mode"
+ default n
+ help
+ fakes out a shadow mode kernel
+
+
config XEN_SCRUB_PAGES
bool "Scrub memory before freeing it to Xen"
default y
# CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
# CONFIG_XEN_BLKDEV_TAP is not set
CONFIG_XEN_WRITABLE_PAGETABLES=y
+CONFIG_XEN_SHADOW_MODE=y
CONFIG_XEN_SCRUB_PAGES=y
CONFIG_X86=y
# CONFIG_X86_64 is not set
for (va = gdt_descr->address, f = 0;
va < gdt_descr->address + gdt_descr->size;
va += PAGE_SIZE, f++) {
+#ifndef CONFIG_XEN_SHADOW_MODE
+ frames[f] = virt_to_machine(va) >> PAGE_SHIFT;
+#else /* CONFIG_XEN_SHADOW_MODE */
frames[f] = __vms_virt_to_machine(va) >> PAGE_SHIFT;
+#endif /* CONFIG_XEN_SHADOW_MODE */
make_page_readonly((void *)va);
}
flush_page_update_queue();
cpumask_t mask;
preempt_disable();
#endif
-#if 0
+#ifndef CONFIG_XEN_SHADOW_MODE
make_pages_readonly(pc->ldt, (pc->size * LDT_ENTRY_SIZE) /
PAGE_SIZE);
-#endif
+#endif /* CONFIG_XEN_SHADOW_MODE */
load_LDT(pc);
flush_page_update_queue();
#ifdef CONFIG_SMP
#endif
}
if (oldsize) {
-#if 0
+#ifndef CONFIG_XEN_SHADOW_MODE
make_pages_writable(oldldt, (oldsize * LDT_ENTRY_SIZE) /
PAGE_SIZE);
-#endif
+#endif /* ! CONFIG_XEN_SHADOW_MODE */
flush_page_update_queue();
if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
vfree(oldldt);
if (err < 0)
return err;
memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
-#if 0
+#ifndef CONFIG_XEN_SHADOW_MODE
make_pages_readonly(new->ldt, (new->size * LDT_ENTRY_SIZE) /
PAGE_SIZE);
-#endif
+#endif /* ! CONFIG_XEN_SHADOW_MODE */
flush_page_update_queue();
return 0;
}
if (mm->context.size) {
if (mm == current->active_mm)
clear_LDT();
-#if 0
+#ifndef CONFIG_XEN_SHADOW_MODE
make_pages_writable(mm->context.ldt,
(mm->context.size * LDT_ENTRY_SIZE) /
PAGE_SIZE);
-#endif
+#endif /* ! CONFIG_XEN_SHADOW_MODE */
flush_page_update_queue();
if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
vfree(mm->context.ldt);
}
lp = (__u32 *) ((ldt_info.entry_number << 3) + (char *) mm->context.ldt);
+#ifndef CONFIG_XEN_SHADOW_MODE
+ mach_lp = arbitrary_virt_to_machine(lp);
+#else /* CONFIG_XEN_SHADOW_MODE */
mach_lp = arbitrary_virt_to_phys(lp);
+#endif /* CONFIG_XEN_SHADOW_MODE */
/* Allow LDTs to be cleared by the user. */
if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
static void
xen_contig_memory(unsigned long vstart, unsigned int order)
{
-#define HACK
-#ifndef HACK
+#ifndef CONFIG_XEN_SHADOW_MODE
/*
* Ensure multi-page extents are contiguous in machine memory.
* This code could be cleaned up some, and the number of
xen_tlb_flush();
balloon_unlock(flags);
-#endif
+#endif /* CONFIG_XEN_SHADOW_MODE */
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
* Load the per-thread Thread-Local Storage descriptor.
* This is load_TLS(next, cpu) with multicalls.
*/
+#ifndef CONFIG_XEN_SHADOW_MODE
+#define C_VIRT_TO_MACH virt_to_machine
+#else /* CONFIG_XEN_SHADOW_MODE */
+#define C_VIRT_TO_MACH virt_to_phys
+#endif
#define C(i) do { \
if (unlikely(next->tls_array[i].a != prev->tls_array[i].a || \
next->tls_array[i].b != prev->tls_array[i].b)) \
queue_multicall3(__HYPERVISOR_update_descriptor, \
- virt_to_phys(&get_cpu_gdt_table(cpu) \
+ C_VIRT_TO_MACH(&get_cpu_gdt_table(cpu) \
[GDT_ENTRY_TLS_MIN + i]), \
((u32 *)&next->tls_array[i])[0], \
((u32 *)&next->tls_array[i])[1]); \
} while (0)
C(0); C(1); C(2);
#undef C
+#undef C_VIRT_TO_MACH
if (xen_start_info.flags & SIF_PRIVILEGED) {
op.cmd = DOM0_IOPL;
shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
EXPORT_SYMBOL(HYPERVISOR_shared_info);
+#ifndef CONFIG_XEN_SHADOW_MODE
+unsigned int *phys_to_machine_mapping, *pfn_to_mfn_frame_list;
+EXPORT_SYMBOL(phys_to_machine_mapping);
+#else /* CONFIG_XEN_SHADOW_MODE */
unsigned int *__vms_phys_to_machine_mapping, *__vms_pfn_to_mfn_frame_list;
EXPORT_SYMBOL(__vms_phys_to_machine_mapping);
+#endif /* CONFIG_XEN_SHADOW_MODE */
DEFINE_PER_CPU(multicall_entry_t, multicall_list[8]);
DEFINE_PER_CPU(int, nr_multicall_ents);
}
#endif
+#ifndef CONFIG_XEN_SHADOW_MODE
+ phys_to_machine_mapping = (unsigned int *)xen_start_info.mfn_list;
+#else /* CONFIG_XEN_SHADOW_MODE */
__vms_phys_to_machine_mapping = (unsigned int *)xen_start_info.mfn_list;
+#endif /* CONFIG_XEN_SHADOW_MODE */
return max_low_pfn;
}
/* Make sure we have a large enough P->M table. */
if (max_pfn > xen_start_info.nr_pages) {
+#ifndef CONFIG_XEN_SHADOW_MODE
+ phys_to_machine_mapping = alloc_bootmem_low_pages(
+#else /* CONFIG_XEN_SHADOW_MODE */
__vms_phys_to_machine_mapping = alloc_bootmem_low_pages(
+#endif /* CONFIG_XEN_SHADOW_MODE */
max_pfn * sizeof(unsigned long));
+#ifndef CONFIG_XEN_SHADOW_MODE
+ memset(phys_to_machine_mapping, ~0,
+#else /* CONFIG_XEN_SHADOW_MODE */
memset(__vms_phys_to_machine_mapping, ~0,
+#endif /* CONFIG_XEN_SHADOW_MODE */
max_pfn * sizeof(unsigned long));
+#ifndef CONFIG_XEN_SHADOW_MODE
+ memcpy(phys_to_machine_mapping,
+#else /* CONFIG_XEN_SHADOW_MODE */
memcpy(__vms_phys_to_machine_mapping,
+#endif /* CONFIG_XEN_SHADOW_MODE */
(unsigned long *)xen_start_info.mfn_list,
xen_start_info.nr_pages * sizeof(unsigned long));
free_bootmem(
sizeof(unsigned long))));
}
+#ifndef CONFIG_XEN_SHADOW_MODE
+ pfn_to_mfn_frame_list = alloc_bootmem_low_pages(PAGE_SIZE);
+#else /* CONFIG_XEN_SHADOW_MODE */
__vms_pfn_to_mfn_frame_list = alloc_bootmem_low_pages(PAGE_SIZE);
+#endif /* CONFIG_XEN_SHADOW_MODE */
for ( i=0, j=0; i < max_pfn; i+=(PAGE_SIZE/sizeof(unsigned long)), j++ )
{
+#ifndef CONFIG_XEN_SHADOW_MODE
+ pfn_to_mfn_frame_list[j] =
+ virt_to_machine(&phys_to_machine_mapping[i]) >> PAGE_SHIFT;
+#else /* CONFIG_XEN_SHADOW_MODE */
__vms_pfn_to_mfn_frame_list[j] =
__vms_virt_to_machine(&__vms_phys_to_machine_mapping[i]) >> PAGE_SHIFT;
+#endif /* CONFIG_XEN_SHADOW_MODE */
}
HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list =
+#ifndef CONFIG_XEN_SHADOW_MODE
+ virt_to_machine(pfn_to_mfn_frame_list) >> PAGE_SHIFT;
+#else /* CONFIG_XEN_SHADOW_MODE */
__vms_virt_to_machine(__vms_pfn_to_mfn_frame_list) >> PAGE_SHIFT;
+#endif /* CONFIG_XEN_SHADOW_MODE */
/*
printk("%08lx\n", regs->eip);
page = ((unsigned long *) per_cpu(cur_pgd, smp_processor_id()))
[address >> 22];
+#ifndef CONFIG_XEN_SHADOW_MODE
+ printk(KERN_ALERT "*pde = ma %08lx pa %08lx\n", page,
+ machine_to_phys(page));
+#else /* CONFIG_XEN_SHADOW_MODE */
printk(KERN_ALERT "*pde = ma %08lx pa %08lx\n",
__vms_phys_to_machine(page), page);
+#endif /* CONFIG_XEN_SHADOW_MODE */
/*
* We must not directly access the pte in the highpte
* case, the page table might be allocated in highmem.
if (page & 1) {
page &= PAGE_MASK;
address &= 0x003ff000;
+#ifndef CONFIG_XEN_SHADOW_MODE
+ page = machine_to_phys(page);
+#endif /* ! CONFIG_XEN_SHADOW_MODE */
page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
+#ifndef CONFIG_XEN_SHADOW_MODE
+ printk(KERN_ALERT "*pte = ma %08lx pa %08lx\n", page,
+ machine_to_phys(page));
+#else /* CONFIG_XEN_SHADOW_MODE */
printk(KERN_ALERT "*pte = ma %08lx pa %08lx\n",
__vms_phys_to_machine(page), page);
+#endif /* CONFIG_XEN_SHADOW_MODE */
}
#endif
show_trace(NULL, (unsigned long *)®s[1]);
#ifdef CONFIG_SMP
#define QUEUE_SIZE 1
#else
+#ifndef CONFIG_XEN_SHADOW_MODE
+#define QUEUE_SIZE 128
+#else /* CONFIG_XEN_SHADOW_MODE */
#define QUEUE_SIZE 1
+#endif /* CONFIG_XEN_SHADOW_MODE */
#endif
#endif
void queue_l1_entry_update(pte_t *ptr, unsigned long val)
{
+#ifndef CONFIG_XEN_SHADOW_MODE
+ int cpu = smp_processor_id();
+ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ idx = per_cpu(mmu_update_queue_idx, cpu);
+ per_cpu(update_queue[idx], cpu).ptr = virt_to_machine(ptr);
+ per_cpu(update_queue[idx], cpu).val = val;
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+#else /* CONFIG_XEN_SHADOW_MODE */
set_pte(ptr, __pte(val));
+#endif /* CONFIG_XEN_SHADOW_MODE */
}
void queue_l2_entry_update(pmd_t *ptr, unsigned long val)
{
+#ifndef CONFIG_XEN_SHADOW_MODE
+ int cpu = smp_processor_id();
+ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ idx = per_cpu(mmu_update_queue_idx, cpu);
+ per_cpu(update_queue[idx], cpu).ptr = virt_to_machine(ptr);
+ per_cpu(update_queue[idx], cpu).val = val;
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+#else /* CONFIG_XEN_SHADOW_MODE */
set_pmd(ptr, __pmd(val));
+#endif /* CONFIG_XEN_SHADOW_MODE */
}
void queue_pt_switch(unsigned long ptr)
unsigned long flags;
spin_lock_irqsave(&update_lock, flags);
idx = per_cpu(mmu_update_queue_idx, cpu);
+#ifndef CONFIG_XEN_SHADOW_MODE
+ per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
+#else /* CONFIG_XEN_SHADOW_MODE */
per_cpu(update_queue[idx], cpu).ptr = __vms_phys_to_machine(ptr);
+#endif /* CONFIG_XEN_SHADOW_MODE */
per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
per_cpu(update_queue[idx], cpu).val = MMUEXT_NEW_BASEPTR;
increment_index();
spin_unlock_irqrestore(&update_lock, flags);
}
+#ifndef CONFIG_XEN_SHADOW_MODE
+void queue_pgd_pin(unsigned long ptr)
+#else /* CONFIG_XEN_SHADOW_MODE */
void __vms_queue_pgd_pin(unsigned long ptr)
+#endif /* CONFIG_XEN_SHADOW_MODE */
{
int cpu = smp_processor_id();
int idx;
unsigned long flags;
spin_lock_irqsave(&update_lock, flags);
idx = per_cpu(mmu_update_queue_idx, cpu);
+#ifndef CONFIG_XEN_SHADOW_MODE
+ per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
+#else /* CONFIG_XEN_SHADOW_MODE */
per_cpu(update_queue[idx], cpu).ptr = __vms_phys_to_machine(ptr);
+#endif /* CONFIG_XEN_SHADOW_MODE */
per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
per_cpu(update_queue[idx], cpu).val = MMUEXT_PIN_L2_TABLE;
increment_index();
spin_unlock_irqrestore(&update_lock, flags);
}
+#ifndef CONFIG_XEN_SHADOW_MODE
+void queue_pgd_unpin(unsigned long ptr)
+#else /* CONFIG_XEN_SHADOW_MODE */
void __vms_queue_pgd_unpin(unsigned long ptr)
+#endif /* CONFIG_XEN_SHADOW_MODE */
{
int cpu = smp_processor_id();
int idx;
unsigned long flags;
spin_lock_irqsave(&update_lock, flags);
idx = per_cpu(mmu_update_queue_idx, cpu);
+#ifndef CONFIG_XEN_SHADOW_MODE
+ per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
+#else /* CONFIG_XEN_SHADOW_MODE */
per_cpu(update_queue[idx], cpu).ptr = __vms_phys_to_machine(ptr);
+#endif /* CONFIG_XEN_SHADOW_MODE */
per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
per_cpu(update_queue[idx], cpu).val = MMUEXT_UNPIN_TABLE;
increment_index();
spin_unlock_irqrestore(&update_lock, flags);
}
+#ifndef CONFIG_XEN_SHADOW_MODE
+void queue_pte_pin(unsigned long ptr)
+#else /* CONFIG_XEN_SHADOW_MODE */
void __vms_queue_pte_pin(unsigned long ptr)
+#endif /* CONFIG_XEN_SHADOW_MODE */
{
int cpu = smp_processor_id();
int idx;
unsigned long flags;
spin_lock_irqsave(&update_lock, flags);
idx = per_cpu(mmu_update_queue_idx, cpu);
+#ifndef CONFIG_XEN_SHADOW_MODE
+ per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
+#else /* CONFIG_XEN_SHADOW_MODE */
per_cpu(update_queue[idx], cpu).ptr = __vms_phys_to_machine(ptr);
+#endif /* CONFIG_XEN_SHADOW_MODE */
per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
per_cpu(update_queue[idx], cpu).val = MMUEXT_PIN_L1_TABLE;
increment_index();
spin_unlock_irqrestore(&update_lock, flags);
}
+#ifndef CONFIG_XEN_SHADOW_MODE
+void queue_pte_unpin(unsigned long ptr)
+#else /* CONFIG_XEN_SHADOW_MODE */
void __vms_queue_pte_unpin(unsigned long ptr)
+#endif /* CONFIG_XEN_SHADOW_MODE */
{
int cpu = smp_processor_id();
int idx;
unsigned long flags;
spin_lock_irqsave(&update_lock, flags);
idx = per_cpu(mmu_update_queue_idx, cpu);
+#ifndef CONFIG_XEN_SHADOW_MODE
+ per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
+#else /* CONFIG_XEN_SHADOW_MODE */
per_cpu(update_queue[idx], cpu).ptr = __vms_phys_to_machine(ptr);
+#endif /* CONFIG_XEN_SHADOW_MODE */
per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
per_cpu(update_queue[idx], cpu).val = MMUEXT_UNPIN_TABLE;
increment_index();
/* queue and flush versions of the above */
void xen_l1_entry_update(pte_t *ptr, unsigned long val)
{
+#ifndef CONFIG_XEN_SHADOW_MODE
+ int cpu = smp_processor_id();
+ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ idx = per_cpu(mmu_update_queue_idx, cpu);
+ per_cpu(update_queue[idx], cpu).ptr = virt_to_machine(ptr);
+ per_cpu(update_queue[idx], cpu).val = val;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+#else /* CONFIG_XEN_SHADOW_MODE */
set_pte(ptr, __pte(val));
+#endif /* CONFIG_XEN_SHADOW_MODE */
}
void xen_l2_entry_update(pmd_t *ptr, unsigned long val)
{
+#ifndef CONFIG_XEN_SHADOW_MODE
+ int cpu = smp_processor_id();
+ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ idx = per_cpu(mmu_update_queue_idx, cpu);
+ per_cpu(update_queue[idx], cpu).ptr = virt_to_machine(ptr);
+ per_cpu(update_queue[idx], cpu).val = val;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+#else /* CONFIG_XEN_SHADOW_MODE */
set_pmd(ptr, __pmd(val));
+#endif /* CONFIG_XEN_SHADOW_MODE */
}
void xen_pt_switch(unsigned long ptr)
unsigned long flags;
spin_lock_irqsave(&update_lock, flags);
idx = per_cpu(mmu_update_queue_idx, cpu);
+#ifndef CONFIG_XEN_SHADOW_MODE
+ per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
+#else /* CONFIG_XEN_SHADOW_MODE */
per_cpu(update_queue[idx], cpu).ptr = __vms_phys_to_machine(ptr);
+#endif /* CONFIG_XEN_SHADOW_MODE */
per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
per_cpu(update_queue[idx], cpu).val = MMUEXT_NEW_BASEPTR;
increment_index_and_flush();
spin_unlock_irqrestore(&update_lock, flags);
}
+#ifndef CONFIG_XEN_SHADOW_MODE
+void xen_pgd_pin(unsigned long ptr)
+#else /* CONFIG_XEN_SHADOW_MODE */
void __vms_xen_pgd_pin(unsigned long ptr)
+#endif /* CONFIG_XEN_SHADOW_MODE */
{
int cpu = smp_processor_id();
int idx;
unsigned long flags;
spin_lock_irqsave(&update_lock, flags);
idx = per_cpu(mmu_update_queue_idx, cpu);
+#ifndef CONFIG_XEN_SHADOW_MODE
+ per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
+#else /* CONFIG_XEN_SHADOW_MODE */
per_cpu(update_queue[idx], cpu).ptr = __vms_phys_to_machine(ptr);
+#endif /* CONFIG_XEN_SHADOW_MODE */
per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
per_cpu(update_queue[idx], cpu).val = MMUEXT_PIN_L2_TABLE;
increment_index_and_flush();
spin_unlock_irqrestore(&update_lock, flags);
}
+#ifndef CONFIG_XEN_SHADOW_MODE
+void xen_pgd_unpin(unsigned long ptr)
+#else /* CONFIG_XEN_SHADOW_MODE */
void __vms_xen_pgd_unpin(unsigned long ptr)
+#endif /* CONFIG_XEN_SHADOW_MODE */
{
int cpu = smp_processor_id();
int idx;
unsigned long flags;
spin_lock_irqsave(&update_lock, flags);
idx = per_cpu(mmu_update_queue_idx, cpu);
+#ifndef CONFIG_XEN_SHADOW_MODE
+ per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
+#else /* CONFIG_XEN_SHADOW_MODE */
per_cpu(update_queue[idx], cpu).ptr = __vms_phys_to_machine(ptr);
+#endif /* CONFIG_XEN_SHADOW_MODE */
per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
per_cpu(update_queue[idx], cpu).val = MMUEXT_UNPIN_TABLE;
increment_index_and_flush();
spin_unlock_irqrestore(&update_lock, flags);
}
+#ifndef CONFIG_XEN_SHADOW_MODE
+void xen_pte_pin(unsigned long ptr)
+#else /* CONFIG_XEN_SHADOW_MODE */
void __vms_xen_pte_pin(unsigned long ptr)
+#endif /* CONFIG_XEN_SHADOW_MODE */
{
int cpu = smp_processor_id();
int idx;
unsigned long flags;
spin_lock_irqsave(&update_lock, flags);
idx = per_cpu(mmu_update_queue_idx, cpu);
+#ifndef CONFIG_XEN_SHADOW_MODE
+ per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
+#else /* CONFIG_XEN_SHADOW_MODE */
per_cpu(update_queue[idx], cpu).ptr = __vms_phys_to_machine(ptr);
+#endif /* CONFIG_XEN_SHADOW_MODE */
per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
per_cpu(update_queue[idx], cpu).val = MMUEXT_PIN_L1_TABLE;
increment_index_and_flush();
spin_unlock_irqrestore(&update_lock, flags);
}
+#ifndef CONFIG_XEN_SHADOW_MODE
+void xen_pte_unpin(unsigned long ptr)
+#else /* CONFIG_XEN_SHADOW_MODE */
void __vms_xen_pte_unpin(unsigned long ptr)
+#endif /* CONFIG_XEN_SHADOW_MODE */
{
int cpu = smp_processor_id();
int idx;
unsigned long flags;
spin_lock_irqsave(&update_lock, flags);
idx = per_cpu(mmu_update_queue_idx, cpu);
+#ifndef CONFIG_XEN_SHADOW_MODE
+ per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
+#else /* CONFIG_XEN_SHADOW_MODE */
per_cpu(update_queue[idx], cpu).ptr = __vms_phys_to_machine(ptr);
+#endif /* CONFIG_XEN_SHADOW_MODE */
per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
per_cpu(update_queue[idx], cpu).val = MMUEXT_UNPIN_TABLE;
increment_index_and_flush();
pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
pfn_array[i] = pte->pte_low >> PAGE_SHIFT;
queue_l1_entry_update(pte, 0);
+#ifndef CONFIG_XEN_SHADOW_MODE
+ phys_to_machine_mapping[__pa(vstart)>>PAGE_SHIFT] = INVALID_P2M_ENTRY;
+#else /* CONFIG_XEN_SHADOW_MODE */
__vms_phys_to_machine_mapping[__pa(vstart)>>PAGE_SHIFT] = INVALID_P2M_ENTRY;
+#endif /* CONFIG_XEN_SHADOW_MODE */
}
/* Flush updates through and flush the TLB. */
{
if (pmd_none(*pmd)) {
pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
- //make_page_readonly(page_table);
+#ifndef CONFIG_XEN_SHADOW_MODE
+ make_page_readonly(page_table);
+#endif /* CONFIG_XEN_SHADOW_MODE */
set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
if (page_table != pte_offset_kernel(pmd, 0))
BUG();
* it. We clean up by write-enabling and then freeing the old page dir.
*/
memcpy(new_pgd, old_pgd, PTRS_PER_PGD_NO_HV*sizeof(pgd_t));
+#ifndef CONFIG_XEN_SHADOW_MODE
+ make_page_readonly(new_pgd);
+ queue_pgd_pin(__pa(new_pgd));
+#endif /* ! CONFIG_XEN_SHADOW_MODE */
load_cr3(new_pgd);
+#ifndef CONFIG_XEN_SHADOW_MODE
+ queue_pgd_unpin(__pa(old_pgd));
+#endif /* ! CONFIG_XEN_SHADOW_MODE */
__flush_tlb_all(); /* implicit flush */
- //make_page_writable(old_pgd);
+#ifndef CONFIG_XEN_SHADOW_MODE
+ make_page_writable(old_pgd);
+#endif /* CONFIG_XEN_SHADOW_MODE */
flush_page_update_queue();
free_bootmem(__pa(old_pgd), PAGE_SIZE);
/* Switch to the real shared_info page, and clear the dummy page. */
flush_page_update_queue();
+#ifndef CONFIG_XEN_SHADOW_MODE
+ set_fixmap_ma(FIX_SHARED_INFO, xen_start_info.shared_info);
+#else /* CONFIG_XEN_SHADOW_MODE */
printk("xen_start_info.shared_info=%x\n", xen_start_info.shared_info);
set_fixmap(FIX_SHARED_INFO, xen_start_info.shared_info);
+#endif /* CONFIG_XEN_SHADOW_MODE */
HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
memset(empty_zero_page, 0, sizeof(empty_zero_page));
#ifdef CONFIG_XEN_PHYSDEV_ACCESS
/* Setup mapping of lower 1st MB */
for (i = 0; i < NR_FIX_ISAMAPS; i++)
+#ifndef CONFIG_XEN_SHADOW_MODE
+ if (xen_start_info.flags & SIF_PRIVILEGED)
+ set_fixmap_ma(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
+ else
+ set_fixmap_ma_ro(FIX_ISAMAP_BEGIN - i,
+ virt_to_machine(empty_zero_page));
+#else /* CONFIG_XEN_SHADOW_MODE */
if (xen_start_info.flags & SIF_PRIVILEGED)
__vms_set_fixmap_ma(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
else
__vms_set_fixmap_ma_ro(FIX_ISAMAP_BEGIN - i,
__vms_virt_to_machine(empty_zero_page));
+#endif /* CONFIG_XEN_SHADOW_MODE */
#endif
}
static inline int is_local_lowmem(unsigned long address)
{
extern unsigned long max_low_pfn;
+#ifndef CONFIG_XEN_SHADOW_MODE
+ unsigned long mfn = address >> PAGE_SHIFT;
+ unsigned long pfn = mfn_to_pfn(mfn);
+ return ((pfn < max_low_pfn) && (pfn_to_mfn(pfn) == mfn));
+#else /* CONFIG_XEN_SHADOW_MODE */
unsigned long pfn = address >> PAGE_SHIFT;
return (pfn < max_low_pfn);
+#endif /* CONFIG_XEN_SHADOW_MODE */
}
/*
/*
* Don't allow anybody to remap normal RAM that we're using..
*/
-#if 0
+#ifndef CONFIG_XEN_SHADOW_MODE
if (is_local_lowmem(phys_addr)) {
char *t_addr, *t_end;
struct page *page;
domid = DOMID_LOCAL;
}
-#endif
+#endif /* ! CONFIG_XEN_SHADOW_MODE */
/*
* Mappings have to be page-aligned
*/
idx = FIX_BTMAP_BEGIN;
while (nrpages > 0) {
+#ifndef CONFIG_XEN_SHADOW_MODE
+ set_fixmap_ma(idx, phys_addr);
+#else /* CONFIG_XEN_SHADOW_MODE */
__vms_set_fixmap_ma(idx, phys_addr);
+#endif /* CONFIG_XEN_SHADOW_MODE */
phys_addr += PAGE_SIZE;
--idx;
--nrpages;
BUG();
do {
+#ifndef CONFIG_XEN_SHADOW_MODE
+ (*v)->ptr = virt_to_machine(pte);
+#else /* CONFIG_XEN_SHADOW_MODE */
(*v)->ptr = __vms_virt_to_machine(pte);
+#endif /* CONFIG_XEN_SHADOW_MODE */
(*v)++;
address += PAGE_SIZE;
pte++;
mmu_update_t u[MAX_DIRECTMAP_MMU_QUEUE], *w, *v;
v = w = &u[0];
- if (0 && domid != DOMID_LOCAL) {
+#ifndef CONFIG_XEN_SHADOW_MODE
+ if (domid != DOMID_LOCAL) {
u[0].ptr = MMU_EXTENDED_COMMAND;
u[0].val = MMUEXT_SET_FOREIGNDOM;
u[0].val |= (unsigned long)domid << 16;
v = w = &u[1];
}
+#endif /* CONFIG_XEN_SHADOW_MODE */
start_address = address;
* Fill in the machine address: PTE ptr is done later by
* __direct_remap_area_pages().
*/
+#ifndef CONFIG_XEN_SHADOW_MODE
+ v->val = (machine_addr & PAGE_MASK) | pgprot_val(prot);
+
+#else /* CONFIG_XEN_SHADOW_MODE */
{
mmu_update_t update;
int success = 0;
v->val = (ppfn << PAGE_SHIFT) | pgprot_val(prot);
}
+#endif /* CONFIG_XEN_SHADOW_MODE */
machine_addr += PAGE_SIZE;
address += PAGE_SIZE;
v++;
if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
pte_t old = *kpte;
pte_t standard = mk_pte(page, PAGE_KERNEL);
+#ifndef CONFIG_XEN_SHADOW_MODE
+ set_pte_batched(kpte, mk_pte(page, prot));
+#else /* CONFIG_XEN_SHADOW_MODE */
set_pte_atomic(kpte, mk_pte(page, prot));
+#endif /* CONFIG_XEN_SHADOW_MODE */
if (pte_same(old,standard))
get_page(kpte_page);
} else {
set_pmd_pte(kpte,address,mk_pte(split, PAGE_KERNEL));
}
} else if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
+#ifndef CONFIG_XEN_SHADOW_MODE
+ set_pte_batched(kpte, mk_pte(page, PAGE_KERNEL));
+#else /* CONFIG_XEN_SHADOW_MODE */
set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
+#endif /* CONFIG_XEN_SHADOW_MODE */
__put_page(kpte_page);
}
if (err)
break;
}
+#ifndef CONFIG_XEN_SHADOW_MODE
+ flush_page_update_queue();
+#endif /* ! CONFIG_XEN_SHADOW_MODE */
spin_unlock_irqrestore(&cpa_lock, flags);
return err;
}
* Associate a virtual page frame with a given physical page frame
* and protection flags for that frame.
*/
+#ifndef CONFIG_XEN_SHADOW_MODE
+static void set_pte_pfn_ma(unsigned long vaddr, unsigned long pfn,
+#else /* CONFIG_XEN_SHADOW_MODE */
static void __vms_set_pte_pfn_ma(unsigned long vaddr, unsigned long pfn,
+#endif /* CONFIG_XEN_SHADOW_MODE */
pgprot_t flags)
{
pgd_t *pgd;
}
pte = pte_offset_kernel(pmd, vaddr);
/* <pfn,flags> stored as-is, to permit clearing entries */
+#ifndef CONFIG_XEN_SHADOW_MODE
+ set_pte(pte, pfn_pte_ma(pfn, flags));
+#else /* CONFIG_XEN_SHADOW_MODE */
{
mmu_update_t update;
int success = 0;
BUG();
set_pte(pte, pfn_pte(ppfn, flags));
}
+#endif /* CONFIG_XEN_SHADOW_MODE */
/*
* It's enough to flush this one mapping.
set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
}
+#ifndef CONFIG_XEN_SHADOW_MODE
+void __set_fixmap_ma (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
+#else /* CONFIG_XEN_SHADOW_MODE */
void __vms___set_fixmap_ma (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
+#endif /* CONFIG_XEN_SHADOW_MODE */
{
unsigned long address = __fix_to_virt(idx);
BUG();
return;
}
+#ifndef CONFIG_XEN_SHADOW_MODE
+ set_pte_pfn_ma(address, phys >> PAGE_SHIFT, flags);
+#else /* CONFIG_XEN_SHADOW_MODE */
__vms_set_pte_pfn_ma(address, phys >> PAGE_SHIFT, flags);
+#endif /* CONFIG_XEN_SHADOW_MODE */
}
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (pte) {
clear_page(pte);
+#ifndef CONFIG_XEN_SHADOW_MODE
+ make_page_readonly(pte);
+ xen_flush_page_update_queue();
+#endif /* ! CONFIG_XEN_SHADOW_MODE */
}
return pte;
}
set_page_count(page, 1);
clear_page(pte);
+#ifndef CONFIG_XEN_SHADOW_MODE
+ make_page_readonly(pte);
+ queue_pte_pin(__pa(pte));
+ flush_page_update_queue();
+#endif /* ! CONFIG_XEN_SHADOW_MODE */
}
void pte_dtor(void *pte, kmem_cache_t *cache, unsigned long unused)
struct page *page = virt_to_page(pte);
ClearPageForeign(page);
+#ifndef CONFIG_XEN_SHADOW_MODE
+ queue_pte_unpin(__pa(pte));
+ make_page_writable(pte);
+ flush_page_update_queue();
+#endif /* ! CONFIG_XEN_SHADOW_MODE */
}
struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
if (pte < highmem_start_page)
#endif
kmem_cache_free(pte_cache,
+#ifndef CONFIG_XEN_SHADOW_MODE
+ phys_to_virt(page_to_pseudophys(pte)));
+#else /* CONFIG_XEN_SHADOW_MODE */
phys_to_virt(__vms_page_to_pseudophys(pte)));
+#endif /* CONFIG_XEN_SHADOW_MODE */
#ifdef CONFIG_HIGHPTE
else
__free_page(pte);
spin_unlock_irqrestore(&pgd_lock, flags);
memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
out:
+#ifndef CONFIG_XEN_SHADOW_MODE
+ make_page_readonly(pgd);
+ queue_pgd_pin(__pa(pgd));
+ flush_page_update_queue();
+#else /* CONFIG_XEN_SHADOW_MODE */
;
+#endif /* CONFIG_XEN_SHADOW_MODE */
}
/* never called when PTRS_PER_PMD > 1 */
{
unsigned long flags; /* can be called from interrupt context */
+#ifndef CONFIG_XEN_SHADOW_MODE
+ queue_pgd_unpin(__pa(pgd));
+ make_page_writable(pgd);
+ flush_page_update_queue();
+#endif /* ! CONFIG_XEN_SHADOW_MODE */
+
if (PTRS_PER_PMD > 1)
return;
pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
pte_t *pte = pte_offset_kernel(pmd, (unsigned long)va);
queue_l1_entry_update(pte, (*(unsigned long *)pte)&~_PAGE_RW);
-#if 0
+#ifndef CONFIG_XEN_SHADOW_MODE
if ( (unsigned long)va >= (unsigned long)high_memory )
{
unsigned long phys;
- phys = __vms_machine_to_phys(*(unsigned long *)pte & PAGE_MASK);
+ phys = machine_to_phys(*(unsigned long *)pte & PAGE_MASK);
#ifdef CONFIG_HIGHMEM
if ( (phys >> PAGE_SHIFT) < highstart_pfn )
#endif
make_lowmem_page_readonly(phys_to_virt(phys));
}
-#endif
+#endif /* ! CONFIG_XEN_SHADOW_MODE */
}
void make_page_writable(void *va)
if ( (unsigned long)va >= (unsigned long)high_memory )
{
unsigned long phys;
+#ifndef CONFIG_XEN_SHADOW_MODE
+ phys = machine_to_phys(*(unsigned long *)pte & PAGE_MASK);
+#else /* CONFIG_XEN_SHADOW_MODE */
phys = __vms_machine_to_phys(*(unsigned long *)pte & PAGE_MASK);
+#endif /* CONFIG_XEN_SHADOW_MODE */
#ifdef CONFIG_HIGHMEM
if ( (phys >> PAGE_SHIFT) < highstart_pfn )
#endif
extern void time_suspend(void);
extern void time_resume(void);
extern unsigned long max_pfn;
+#ifndef CONFIG_XEN_SHADOW_MODE
+ extern unsigned int *pfn_to_mfn_frame_list;
+#else /* CONFIG_XEN_SHADOW_MODE */
extern unsigned int *__vms_pfn_to_mfn_frame_list;
+#endif /* CONFIG_XEN_SHADOW_MODE */
suspend_record = (suspend_record_t *)__get_free_page(GFP_KERNEL);
if ( suspend_record == NULL )
memcpy(&suspend_record->resume_info, &xen_start_info, sizeof(xen_start_info));
+#ifndef CONFIG_XEN_SHADOW_MODE
+ HYPERVISOR_suspend(virt_to_machine(suspend_record) >> PAGE_SHIFT);
+#else /* CONFIG_XEN_SHADOW_MODE */
HYPERVISOR_suspend(__vms_virt_to_machine(suspend_record) >> PAGE_SHIFT);
+#endif /* CONFIG_XEN_SHADOW_MODE */
HYPERVISOR_vm_assist(VMASST_CMD_enable,
VMASST_TYPE_4gb_segments);
memcpy(&xen_start_info, &suspend_record->resume_info, sizeof(xen_start_info));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)) && !defined(CONFIG_XEN_SHADOW_MODE)
+ set_fixmap_ma(FIX_SHARED_INFO, xen_start_info.shared_info);
+#else
set_fixmap(FIX_SHARED_INFO, xen_start_info.shared_info);
+#endif
HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
for ( i=0, j=0; i < max_pfn; i+=(PAGE_SIZE/sizeof(unsigned long)), j++ )
{
+#ifndef CONFIG_XEN_SHADOW_MODE
+ pfn_to_mfn_frame_list[j] =
+ virt_to_machine(&phys_to_machine_mapping[i]) >> PAGE_SHIFT;
+#else /* CONFIG_XEN_SHADOW_MODE */
__vms_pfn_to_mfn_frame_list[j] =
__vms_virt_to_machine(&__vms_phys_to_machine_mapping[i]) >> PAGE_SHIFT;
+#endif /* CONFIG_XEN_SHADOW_MODE */
}
HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list =
+#ifndef CONFIG_XEN_SHADOW_MODE
+ virt_to_machine(pfn_to_mfn_frame_list) >> PAGE_SHIFT;
+#else /* CONFIG_XEN_SHADOW_MODE */
__vms_virt_to_machine(__vms_pfn_to_mfn_frame_list) >> PAGE_SHIFT;
+#endif /* CONFIG_XEN_SHADOW_MODE */
irq_resume();
BUG();
pfn = page - mem_map;
+#ifndef CONFIG_XEN_SHADOW_MODE
+ if ( phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY )
+#else /* CONFIG_XEN_SHADOW_MODE */
if ( __vms_phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY )
+#endif /* CONFIG_XEN_SHADOW_MODE */
BUG();
/* Update P->M and M->P tables. */
+#ifndef CONFIG_XEN_SHADOW_MODE
+ phys_to_machine_mapping[pfn] = mfn_list[i];
+#else /* CONFIG_XEN_SHADOW_MODE */
__vms_phys_to_machine_mapping[pfn] = mfn_list[i];
+#endif /* CONFIG_XEN_SHADOW_MODE */
queue_machphys_update(mfn_list[i], pfn);
/* Link back into the page tables if it's not a highmem page. */
}
pfn = page - mem_map;
+#ifndef CONFIG_XEN_SHADOW_MODE
+ mfn_list[i] = phys_to_machine_mapping[pfn];
+ phys_to_machine_mapping[pfn] = INVALID_P2M_ENTRY;
+#else /* CONFIG_XEN_SHADOW_MODE */
mfn_list[i] = __vms_phys_to_machine_mapping[pfn];
__vms_phys_to_machine_mapping[pfn] = INVALID_P2M_ENTRY;
+#endif /* CONFIG_XEN_SHADOW_MODE */
if ( !PageHighMem(page) )
{
#else
mcl[i].args[3] = blkif->domid;
#endif
+#ifndef CONFIG_XEN_SHADOW_MODE
+ phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx, i))>>PAGE_SHIFT] =
+#else /* CONFIG_XEN_SHADOW_MODE */
__vms_phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx, i))>>PAGE_SHIFT] =
+#endif /* CONFIG_XEN_SHADOW_MODE */
FOREIGN_FRAME(phys_seg[i].buffer >> PAGE_SHIFT);
}
xreq->sector_number = req->sector_number;
for ( i = 0; i < req->nr_segments; i++ )
+#ifndef CONFIG_XEN_SHADOW_MODE
+ xreq->frame_and_sects[i] = machine_to_phys(req->frame_and_sects[i]);
+#else /* CONFIG_XEN_SHADOW_MODE */
xreq->frame_and_sects[i] = __vms_machine_to_phys(req->frame_and_sects[i]);
+#endif /* CONFIG_XEN_SHADOW_MODE */
}
static inline void translate_req_to_mfn(blkif_request_t *xreq,
xreq->sector_number = req->sector_number;
for ( i = 0; i < req->nr_segments; i++ )
+#ifndef CONFIG_XEN_SHADOW_MODE
+ xreq->frame_and_sects[i] = phys_to_machine(req->frame_and_sects[i]);
+#else /* CONFIG_XEN_SHADOW_MODE */
xreq->frame_and_sects[i] = __vms_phys_to_machine(req->frame_and_sects[i]);
+#endif /* CONFIG_XEN_SHADOW_MODE */
}
blkif_fe_interface_connect_t *msg = (void*)cmsg.msg;
msg->handle = 0;
+#ifndef CONFIG_XEN_SHADOW_MODE
+ msg->shmem_frame = (virt_to_machine(blk_ring.sring) >> PAGE_SHIFT);
+#else /* CONFIG_XEN_SHADOW_MODE */
msg->shmem_frame = (__vms_virt_to_machine(blk_ring.sring) >> PAGE_SHIFT);
+#endif /* CONFIG_XEN_SHADOW_MODE */
ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
}
for ( i = 0; i < req->nr_segments; i++ )
{
unsigned long pfn = req->frame_and_sects[i] >> PAGE_SHIFT;
+#ifndef CONFIG_XEN_SHADOW_MODE
+ unsigned long mfn = phys_to_machine_mapping[pfn];
+#else /* CONFIG_XEN_SHADOW_MODE */
unsigned long mfn = __vms_phys_to_machine_mapping[pfn];
+#endif /* CONFIG_XEN_SHADOW_MODE */
xen_machphys_update(mfn, pfn);
}
break;
memset(&req, 0, sizeof(req));
req.operation = BLKIF_OP_PROBE;
req.nr_segments = 1;
+#ifndef CONFIG_XEN_SHADOW_MODE
+ req.frame_and_sects[0] = virt_to_machine(buf) | 7;
+#else /* CONFIG_XEN_SHADOW_MODE */
req.frame_and_sects[0] = __vms_virt_to_machine(buf) | 7;
+#endif /* CONFIG_XEN_SHADOW_MODE */
blkif_control_send(&req, &rsp);
{
netif = netdev_priv(skb->dev);
vdata = (unsigned long)skb->data;
+#ifndef CONFIG_XEN_SHADOW_MODE
+ mdata = virt_to_machine(vdata);
+#else /* CONFIG_XEN_SHADOW_MODE */
mdata = __vms_virt_to_machine(vdata);
+#endif /* CONFIG_XEN_SHADOW_MODE */
/* Memory squeeze? Back off for an arbitrary while. */
if ( (new_mfn = alloc_mfn()) == 0 )
* Set the new P2M table entry before reassigning the old data page.
* Heed the comment in pgtable-2level.h:pte_page(). :-)
*/
+#ifndef CONFIG_XEN_SHADOW_MODE
+ phys_to_machine_mapping[__pa(skb->data) >> PAGE_SHIFT] = new_mfn;
+#else /* CONFIG_XEN_SHADOW_MODE */
__vms_phys_to_machine_mapping[__pa(skb->data) >> PAGE_SHIFT] = new_mfn;
+#endif /* CONFIG_XEN_SHADOW_MODE */
mmu[0].ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
mmu[0].val = __pa(vdata) >> PAGE_SHIFT;
continue;
}
+#ifndef CONFIG_XEN_SHADOW_MODE
+ phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT] =
+#else /* CONFIG_XEN_SHADOW_MODE */
__vms_phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT] =
+#endif /* CONFIG_XEN_SHADOW_MODE */
FOREIGN_FRAME(txreq.addr >> PAGE_SHIFT);
data_len = (txreq.size > PKT_PROT_LEN) ? PKT_PROT_LEN : txreq.size;
np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.id = id;
+#ifndef CONFIG_XEN_SHADOW_MODE
+ rx_pfn_array[i] = virt_to_machine(skb->head) >> PAGE_SHIFT;
+#else /* CONFIG_XEN_SHADOW_MODE */
rx_pfn_array[i] = __vms_virt_to_machine(skb->head) >> PAGE_SHIFT;
+#endif /* CONFIG_XEN_SHADOW_MODE */
/* Remove this page from pseudo phys map before passing back to Xen. */
+#ifndef CONFIG_XEN_SHADOW_MODE
+ phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT]
+#else /* CONFIG_XEN_SHADOW_MODE */
__vms_phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT]
+#endif /* CONFIG_XEN_SHADOW_MODE */
= INVALID_P2M_ENTRY;
rx_mcl[i].op = __HYPERVISOR_update_va_mapping;
tx = &np->tx->ring[MASK_NETIF_TX_IDX(i)].req;
tx->id = id;
+#ifndef CONFIG_XEN_SHADOW_MODE
+ tx->addr = virt_to_machine(skb->data);
+#else /* CONFIG_XEN_SHADOW_MODE */
tx->addr = __vms_virt_to_machine(skb->data);
+#endif /* CONFIG_XEN_SHADOW_MODE */
tx->size = skb->len;
wmb(); /* Ensure that backend will see the request. */
mcl->args[2] = 0;
mcl++;
+#ifndef CONFIG_XEN_SHADOW_MODE
+ phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] =
+#else /* CONFIG_XEN_SHADOW_MODE */
__vms_phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] =
+#endif /* CONFIG_XEN_SHADOW_MODE */
rx->addr >> PAGE_SHIFT;
__skb_queue_tail(&rxq, skb);
tx = &np->tx->ring[requeue_idx++].req;
tx->id = i;
+#ifndef CONFIG_XEN_SHADOW_MODE
+ tx->addr = virt_to_machine(skb->data);
+#else /* CONFIG_XEN_SHADOW_MODE */
tx->addr = __vms_virt_to_machine(skb->data);
+#endif /* CONFIG_XEN_SHADOW_MODE */
tx->size = skb->len;
np->stats.tx_bytes += skb->len;
netif_fe_interface_connect_t *msg = (void*)cmsg.msg;
msg->handle = np->handle;
+#ifndef CONFIG_XEN_SHADOW_MODE
+ msg->tx_shmem_frame = (virt_to_machine(np->tx) >> PAGE_SHIFT);
+ msg->rx_shmem_frame = (virt_to_machine(np->rx) >> PAGE_SHIFT);
+#else /* CONFIG_XEN_SHADOW_MODE */
msg->tx_shmem_frame = (__vms_virt_to_machine(np->tx) >> PAGE_SHIFT);
msg->rx_shmem_frame = (__vms_virt_to_machine(np->rx) >> PAGE_SHIFT);
+#endif /* CONFIG_XEN_SHADOW_MODE */
ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
}
case IOCTL_PRIVCMD_GET_MACH2PHYS_START_MFN:
{
+#ifndef CONFIG_XEN_SHADOW_MODE
+ unsigned long m2pv = (unsigned long)machine_to_phys_mapping;
+#else /* CONFIG_XEN_SHADOW_MODE */
unsigned long m2pv = (unsigned long)__vms_machine_to_phys_mapping;
+#endif /* CONFIG_XEN_SHADOW_MODE */
pgd_t *pgd = pgd_offset_k(m2pv);
pmd_t *pmd = pmd_offset(pgd, m2pv);
unsigned long m2p_start_mfn = pmd_val(*pmd) >> PAGE_SHIFT;
static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
{
+#ifndef CONFIG_XEN_SHADOW_MODE
+#define C(i) HYPERVISOR_update_descriptor(virt_to_machine(&get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]), ((u32 *)&t->tls_array[i])[0], ((u32 *)&t->tls_array[i])[1])
+#else /* CONFIG_XEN_SHADOW_MODE */
#define C(i) HYPERVISOR_update_descriptor(__pa(&get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]), ((u32 *)&t->tls_array[i])[0], ((u32 *)&t->tls_array[i])[1])
+#endif /* CONFIG_XEN_SHADOW_MODE */
C(0); C(1); C(2);
#undef C
}
extern void __set_fixmap (enum fixed_addresses idx,
unsigned long phys, pgprot_t flags);
+#ifndef CONFIG_XEN_SHADOW_MODE
+extern void __set_fixmap_ma (enum fixed_addresses idx,
+#else /* CONFIG_XEN_SHADOW_MODE */
extern void __vms___set_fixmap_ma (enum fixed_addresses idx,
+#endif /* CONFIG_XEN_SHADOW_MODE */
unsigned long mach, pgprot_t flags);
#define set_fixmap(idx, phys) \
__set_fixmap(idx, phys, PAGE_KERNEL)
+#ifndef CONFIG_XEN_SHADOW_MODE
+#define set_fixmap_ma(idx, phys) \
+ __set_fixmap_ma(idx, phys, PAGE_KERNEL)
+#define set_fixmap_ma_ro(idx, phys) \
+ __set_fixmap_ma(idx, phys, PAGE_KERNEL_RO)
+#else /* CONFIG_XEN_SHADOW_MODE */
#define __vms_set_fixmap_ma(idx, phys) \
__vms___set_fixmap_ma(idx, phys, PAGE_KERNEL)
#define __vms_set_fixmap_ma_ro(idx, phys) \
__vms___set_fixmap_ma(idx, phys, PAGE_KERNEL_RO)
+#endif /* CONFIG_XEN_SHADOW_MODE */
/*
* Some hardware wants to get fixmapped without caching.
*/
/*
* Change "struct page" to physical address.
*/
+#ifndef CONFIG_XEN_SHADOW_MODE
+#define page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
+#define page_to_phys(page) (phys_to_machine(page_to_pseudophys(page)))
+#else /* CONFIG_XEN_SHADOW_MODE */
#define __vms_page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
#define __vms_page_to_machphys(page) (__vms_phys_to_machine(__vms_page_to_pseudophys(page)))
#define page_to_phys(page) (__vms_page_to_machphys(page))
+#endif /* CONFIG_XEN_SHADOW_MODE */
+#ifndef CONFIG_XEN_SHADOW_MODE
+#define bio_to_pseudophys(bio) (page_to_pseudophys(bio_page((bio))) + \
+ (unsigned long) bio_offset((bio)))
+#else /* CONFIG_XEN_SHADOW_MODE */
#define __vms_bio_to_pseudophys(bio) (__vms_page_to_pseudophys(bio_page((bio))) + \
(unsigned long) bio_offset((bio)))
+#endif /* CONFIG_XEN_SHADOW_MODE */
+
+#ifndef CONFIG_XEN_SHADOW_MODE
+#define bvec_to_pseudophys(bv) (page_to_pseudophys((bv)->bv_page) + \
+ (unsigned long) (bv)->bv_offset)
+#else /* CONFIG_XEN_SHADOW_MODE */
#define __vms_bvec_to_pseudophys(bv) (__vms_page_to_pseudophys((bv)->bv_page) + \
(unsigned long) (bv)->bv_offset)
#define __vms_bvec_to_machphys(bv) (__vms_page_to_machphys((bv)->bv_page) + \
(unsigned long) (bv)->bv_offset)
+#endif /* CONFIG_XEN_SHADOW_MODE */
+#ifndef CONFIG_XEN_SHADOW_MODE
+#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
+ (((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) && \
+ ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == \
+ bvec_to_pseudophys((vec2))))
+#else /* CONFIG_XEN_SHADOW_MODE */
#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
(((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) && \
((__vms_bvec_to_machphys((vec1)) + (vec1)->bv_len) == \
__vms_bvec_to_machphys((vec2))))
+#endif /* CONFIG_XEN_SHADOW_MODE */
extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
*
* Allow them on x86 for legacy drivers, though.
*/
+#ifndef CONFIG_XEN_SHADOW_MODE
+#define virt_to_bus(_x) phys_to_machine(__pa(_x))
+#define bus_to_virt(_x) __va(machine_to_phys(_x))
+#else /* CONFIG_XEN_SHADOW_MODE */
#define virt_to_bus(_x) __vms_phys_to_machine(__pa(_x))
#define bus_to_virt(_x) ({ BUG(); __va((_x)); })
+#endif /* CONFIG_XEN_SHADOW_MODE */
/*
* readX/writeX() are used to access memory mapped devices. On some
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
-#ifndef BUG
+#ifdef CONFIG_XEN_SHADOW_MODE
#include <asm/bug.h>
-#endif
+#endif /* CONFIG_XEN_SHADOW_MODE */
#include <linux/config.h>
#include <linux/string.h>
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
+#ifndef CONFIG_XEN_SHADOW_MODE
+extern unsigned int *phys_to_machine_mapping;
+#define pfn_to_mfn(_pfn) ((unsigned long)(phys_to_machine_mapping[(_pfn)]))
+#define mfn_to_pfn(_mfn) ((unsigned long)(machine_to_phys_mapping[(_mfn)]))
+static inline unsigned long phys_to_machine(unsigned long phys)
+#else /* CONFIG_XEN_SHADOW_MODE */
extern unsigned int *__vms_phys_to_machine_mapping;
#define __vms_pfn_to_mfn(_pfn) ((unsigned long)(__vms_phys_to_machine_mapping[(_pfn)]))
#define __vms_mfn_to_pfn(_mfn) ({ BUG(); ((unsigned long)(__vms_machine_to_phys_mapping[(_mfn)])); })
static inline unsigned long __vms_phys_to_machine(unsigned long phys)
+#endif /* CONFIG_XEN_SHADOW_MODE */
{
+#ifndef CONFIG_XEN_SHADOW_MODE
+ unsigned long machine = pfn_to_mfn(phys >> PAGE_SHIFT);
+#else /* CONFIG_XEN_SHADOW_MODE */
unsigned long machine = __vms_pfn_to_mfn(phys >> PAGE_SHIFT);
+#endif /* CONFIG_XEN_SHADOW_MODE */
machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
return machine;
}
+#ifndef CONFIG_XEN_SHADOW_MODE
+static inline unsigned long machine_to_phys(unsigned long machine)
+#else /* CONFIG_XEN_SHADOW_MODE */
static inline unsigned long __vms_machine_to_phys(unsigned long machine)
+#endif /* CONFIG_XEN_SHADOW_MODE */
{
+#ifndef CONFIG_XEN_SHADOW_MODE
+ unsigned long phys = mfn_to_pfn(machine >> PAGE_SHIFT);
+#else /* CONFIG_XEN_SHADOW_MODE */
unsigned long phys = __vms_mfn_to_pfn(machine >> PAGE_SHIFT);
+#endif /* CONFIG_XEN_SHADOW_MODE */
phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
return phys;
}
typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
#define boot_pte_t pte_t /* or would you rather have a typedef */
+#ifndef CONFIG_XEN_SHADOW_MODE
+#define pte_val(x) (((x).pte_low & 1) ? machine_to_phys((x).pte_low) : \
+ (x).pte_low)
+#define pte_val_ma(x) ((x).pte_low)
+#else /* CONFIG_XEN_SHADOW_MODE */
#define pte_val(x) ((x).pte_low)
#define __vms_pte_val_ma(x) ((x).pte_low)
+#endif /* CONFIG_XEN_SHADOW_MODE */
#define HPAGE_SHIFT 22
#endif
#define PTE_MASK PAGE_MASK
static inline unsigned long pmd_val(pmd_t x)
{
+#ifndef CONFIG_XEN_SHADOW_MODE
+ unsigned long ret = x.pmd;
+ if (ret) ret = machine_to_phys(ret);
+ return ret;
+#else /* CONFIG_XEN_SHADOW_MODE */
return x.pmd;
+#endif /* CONFIG_XEN_SHADOW_MODE */
}
#define pgd_val(x) ({ BUG(); (unsigned long)0; })
#define pgprot_val(x) ((x).pgprot)
static inline pte_t __pte(unsigned long x)
{
+#ifndef CONFIG_XEN_SHADOW_MODE
+ if (x & 1) x = phys_to_machine(x);
+#endif /* ! CONFIG_XEN_SHADOW_MODE */
return ((pte_t) { (x) });
}
+#ifndef CONFIG_XEN_SHADOW_MODE
+#define __pte_ma(x) ((pte_t) { (x) } )
+#endif /* ! CONFIG_XEN_SHADOW_MODE */
static inline pmd_t __pmd(unsigned long x)
{
+#ifndef CONFIG_XEN_SHADOW_MODE
+ if ((x & 1)) x = phys_to_machine(x);
+#endif /* ! CONFIG_XEN_SHADOW_MODE */
return ((pmd_t) { (x) });
}
#define __pgd(x) ({ BUG(); (pgprot_t) { 0 }; })
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
/* VIRT <-> MACHINE conversion */
+#ifndef CONFIG_XEN_SHADOW_MODE
+#define virt_to_machine(_a) (phys_to_machine(__pa(_a)))
+#define machine_to_virt(_m) (__va(machine_to_phys(_m)))
+#else /* CONFIG_XEN_SHADOW_MODE */
#define __vms_virt_to_machine(_a) (__vms_phys_to_machine(__pa(_a)))
#define __vms_machine_to_virt(_m) (__va(__vms_machine_to_phys(_m)))
+#endif /* CONFIG_XEN_SHADOW_MODE */
#endif /* __KERNEL__ */
set_pmd(pmd, __pmd(_PAGE_TABLE +
((unsigned long long)page_to_pfn(pte) <<
(unsigned long long) PAGE_SHIFT)));
+#ifndef CONFIG_XEN_SHADOW_MODE
+ flush_page_update_queue();
+#endif /* ! CONFIG_XEN_SHADOW_MODE */
}
/*
* Allocate and free page tables.
static inline void pte_free_kernel(pte_t *pte)
{
free_page((unsigned long)pte);
+#ifndef CONFIG_XEN_SHADOW_MODE
+ make_page_writable(pte);
+ flush_page_update_queue();
+#endif /* ! CONFIG_XEN_SHADOW_MODE */
}
extern void pte_free(struct page *pte);
* (pmds are folded into pgds so this doesn't get actually called,
* but the define is needed for a generic inline function.)
*/
+#ifndef CONFIG_XEN_SHADOW_MODE
+#define set_pmd(pmdptr, pmdval) xen_l2_entry_update((pmdptr), (pmdval).pmd)
+#else /* CONFIG_XEN_SHADOW_MODE */
#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
+#endif /* CONFIG_XEN_SHADOW_MODE */
#define set_pgd(pgdptr, pgdval) ((void)0)
#define pgd_page(pgd) \
{
pte_t pte = *xp;
if (pte.pte_low)
+#ifndef CONFIG_XEN_SHADOW_MODE
+ set_pte(xp, __pte_ma(0));
+#else /* CONFIG_XEN_SHADOW_MODE */
set_pte(xp, __pte(0));
+#endif /* CONFIG_XEN_SHADOW_MODE */
return pte;
}
*/
#define INVALID_P2M_ENTRY (~0U)
#define FOREIGN_FRAME(_m) ((_m) | (1UL<<((sizeof(unsigned long)*8)-1)))
+#ifndef CONFIG_XEN_SHADOW_MODE
+#define pte_pfn(_pte) \
+({ \
+ unsigned long mfn = (_pte).pte_low >> PAGE_SHIFT; \
+ unsigned long pfn = mfn_to_pfn(mfn); \
+ if ((pfn >= max_mapnr) || (pfn_to_mfn(pfn) != mfn)) \
+ pfn = max_mapnr; /* special: force !pfn_valid() */ \
+ pfn; \
+})
+#else /* CONFIG_XEN_SHADOW_MODE */
#define pte_pfn(_pte) ((_pte).pte_low >> PAGE_SHIFT)
+#endif /* CONFIG_XEN_SHADOW_MODE */
#define pte_page(_pte) pfn_to_page(pte_pfn(_pte))
#define pte_none(x) (!(x).pte_low)
#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+#ifndef CONFIG_XEN_SHADOW_MODE
+#define pfn_pte_ma(pfn, prot) __pte_ma(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+#endif /* ! CONFIG_XEN_SHADOW_MODE */
#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
/*
#define pmd_page_kernel(pmd) \
((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
+#ifndef CONFIG_XEN_SHADOW_MODE
+#define pmd_clear(xp) do { \
+ set_pmd(xp, __pmd(0)); \
+ xen_flush_page_update_queue(); \
+} while (0)
+#else /* CONFIG_XEN_SHADOW_MODE */
#define pmd_clear(xp) do { \
set_pmd(xp, __pmd(0)); \
} while (0)
+#endif /* CONFIG_XEN_SHADOW_MODE */
#ifndef CONFIG_DISCONTIGMEM
#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
void make_pages_readonly(void *va, unsigned int nr);
void make_pages_writable(void *va, unsigned int nr);
+#ifndef CONFIG_XEN_SHADOW_MODE
+#define arbitrary_virt_to_machine(__va) \
+({ \
+ pgd_t *__pgd = pgd_offset_k((unsigned long)(__va)); \
+ pmd_t *__pmd = pmd_offset(__pgd, (unsigned long)(__va)); \
+ pte_t *__pte = pte_offset_kernel(__pmd, (unsigned long)(__va)); \
+ unsigned long __pa = (*(unsigned long *)__pte) & PAGE_MASK; \
+ __pa | ((unsigned long)(__va) & (PAGE_SIZE-1)); \
+})
+#else /* CONFIG_XEN_SHADOW_MODE */
#define __vms_arbitrary_virt_to_machine(__va) \
({ \
pgd_t *__pgd = pgd_offset_k((unsigned long)(__va)); \
unsigned long __pa = (*(unsigned long *)__pte) & PAGE_MASK; \
__vms_phys_to_machine(__pa) | ((unsigned long)(__va) & (PAGE_SIZE-1)); \
})
+#endif /* CONFIG_XEN_SHADOW_MODE */
+#ifdef CONFIG_XEN_SHADOW_MODE
#define arbitrary_virt_to_phys(__va) \
({ \
pgd_t *__pgd = pgd_offset_k((unsigned long)(__va)); \
unsigned long __pa = (*(unsigned long *)__pte) & PAGE_MASK; \
(__pa) | ((unsigned long)(__va) & (PAGE_SIZE-1)); \
})
+#endif /* CONFIG_XEN_SHADOW_MODE */
#endif /* !__ASSEMBLY__ */
* machine->physical mapping table starts at this address, read-only.
*/
#define HYPERVISOR_VIRT_START (0xFC000000UL)
+#ifndef CONFIG_XEN_SHADOW_MODE
+#ifndef machine_to_phys_mapping
+#define machine_to_phys_mapping ((u32 *)HYPERVISOR_VIRT_START)
+#endif
+#else /* CONFIG_XEN_SHADOW_MODE */
#ifndef __vms_machine_to_phys_mapping
#define __vms_machine_to_phys_mapping ((u32 *)HYPERVISOR_VIRT_START)
#endif
+#endif /* CONFIG_XEN_SHADOW_MODE */
#ifndef __ASSEMBLY__